READ ME
Application research of machine learning in functional magnetic resonance imaging (fMRI) data analysis
Maximilian LOK
D19091100054
last update 20-04-21
from nilearn import datasets
rest_dataset = datasets.fetch_development_fmri(n_subjects=30)
func_filenames = rest_dataset.func
print('First functional nifti image (4D) is at: %s' %
rest_dataset.func[0])
First functional nifti image (4D) is at: C:\Users\pc/nilearn_data\development_fmri\development_fmri\sub-pixar123_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz
F:\Users\pc\anaconda3\lib\site-packages\nilearn\datasets\__init__.py:87: FutureWarning: Fetchers from the nilearn.datasets module will be updated in version 0.9 to return python strings instead of bytes and Pandas dataframes instead of Numpy arrays.
warn("Fetchers from the nilearn.datasets module will be "
canICA
from nilearn.decomposition import CanICA
canica = CanICA(n_components=20,
memory="nilearn_cache", memory_level=2,
verbose=10,
mask_strategy='template',
random_state=0)
canica.fit(func_filenames)
# Retrieve the independent components in brain space. Directly
# accesible through attribute `components_img_`.
canica_components_img = canica.components_img_
# components_img is a Nifti Image object, and can be saved to a file with
# the following line:
canica_components_img.to_filename('canica0251_resting_state.nii.gz')
[MultiNiftiMasker.fit] Loading data from [C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar123_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar124_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar125_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar126_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar127_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar128_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar001_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar002_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar003_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar004_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar005_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar006_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar007_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar008_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar009_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar010_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar011_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar012_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar013_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar014_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar015_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar016_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar017_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar018_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar019_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar020_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar021_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar022_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar023_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar024_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz] [MultiNiftiMasker.fit] Computing mask [MultiNiftiMasker.transform] Resampling mask [CanICA] Loading data [Memory]0.6s, 0.0min : Loading randomized_svd from nilearn_cache\joblib\sklearn\utils\extmath\randomized_svd\25b1efce2c2846820669f025817176d2 ______________________________________randomized_svd cache loaded - 0.0s, 0.0min
[Parallel(n_jobs=1)]: Using backend SequentialBackend with 1 concurrent workers. [Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.0s remaining: 0.0s [Parallel(n_jobs=1)]: Done 2 out of 2 | elapsed: 0.1s remaining: 0.0s
[Memory]0.9s, 0.0min : Loading fastica from nilearn_cache\joblib\sklearn\decomposition\_fastica\fastica\4b287744e61a60e72cc8ee4cb2807070 _____________________________________________fastica cache loaded - 0.0s, 0.0min [Memory]1.0s, 0.0min : Loading fastica from nilearn_cache\joblib\sklearn\decomposition\_fastica\fastica\fc307a329034fc768eb2883fad7b5e70 _____________________________________________fastica cache loaded - 0.0s, 0.0min [Memory]1.1s, 0.0min : Loading fastica from nilearn_cache\joblib\sklearn\decomposition\_fastica\fastica\8d5b024654a82c1e8ddd6c6970eae9b5 _____________________________________________fastica cache loaded - 0.0s, 0.0min [Memory]1.1s, 0.0min : Loading fastica from nilearn_cache\joblib\sklearn\decomposition\_fastica\fastica\7c31d23a7eb24ee90d030597469c8e69 _____________________________________________fastica cache loaded - 0.0s, 0.0min [Memory]1.2s, 0.0min : Loading fastica from nilearn_cache\joblib\sklearn\decomposition\_fastica\fastica\7c3602a4a0ee7c8fd3a530804ca9a470 _____________________________________________fastica cache loaded - 0.0s, 0.0min [Memory]1.3s, 0.0min : Loading fastica from nilearn_cache\joblib\sklearn\decomposition\_fastica\fastica\bbc81823c9876952af17d5909cacf80c
[Parallel(n_jobs=1)]: Done 3 out of 3 | elapsed: 0.2s remaining: 0.0s [Parallel(n_jobs=1)]: Done 4 out of 4 | elapsed: 0.2s remaining: 0.0s [Parallel(n_jobs=1)]: Done 5 out of 5 | elapsed: 0.3s remaining: 0.0s
_____________________________________________fastica cache loaded - 0.0s, 0.0min [Memory]1.4s, 0.0min : Loading fastica from nilearn_cache\joblib\sklearn\decomposition\_fastica\fastica\98ffcf9048ab4f755e6b30e43af62f11 _____________________________________________fastica cache loaded - 0.0s, 0.0min [Memory]1.5s, 0.0min : Loading fastica from nilearn_cache\joblib\sklearn\decomposition\_fastica\fastica\329e2778ecc8a4a36015fa7b28b03310 _____________________________________________fastica cache loaded - 0.0s, 0.0min [Memory]1.5s, 0.0min : Loading fastica from nilearn_cache\joblib\sklearn\decomposition\_fastica\fastica\98e660d43cc6c0ad3b3fd86d203e955a
[Parallel(n_jobs=1)]: Done 6 out of 6 | elapsed: 0.4s remaining: 0.0s [Parallel(n_jobs=1)]: Done 7 out of 7 | elapsed: 0.5s remaining: 0.0s [Parallel(n_jobs=1)]: Done 8 out of 8 | elapsed: 0.5s remaining: 0.0s
_____________________________________________fastica cache loaded - 0.0s, 0.0min [Memory]1.6s, 0.0min : Loading fastica from nilearn_cache\joblib\sklearn\decomposition\_fastica\fastica\85eee172315dcf54043a15252c68bd11
[Parallel(n_jobs=1)]: Done 9 out of 9 | elapsed: 0.6s remaining: 0.0s
_____________________________________________fastica cache loaded - 0.1s, 0.0min
[Parallel(n_jobs=1)]: Done 10 out of 10 | elapsed: 0.8s finished
# since we didn't proccess any niftimasker
# here by we can directly plotting the copuling ICA comps
from nilearn.plotting import plot_prob_atlas
plot_prob_atlas(canica_components_img, title='All ICA components')
from nilearn.image import iter_img
from nilearn.plotting import plot_stat_map, show
for i, cur_img in enumerate(iter_img(canica_components_img)):
plot_stat_map(cur_img, display_mode="z", title="IC %d" % i,
cut_coords=1, colorbar=True)
F:\Users\pc\anaconda3\lib\site-packages\nilearn\plotting\displays.py:1750: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance. ax = fh.add_axes([fraction * index * (x1 - x0) + x0, y0,
#############################################
#############################################
#############################################
from nilearn.decomposition import DictLearning
dict_learning = DictLearning(n_components=20,
memory="nilearn_cache", memory_level=2,
verbose=1,
random_state=0,
n_epochs=1,
mask_strategy='template')
print('[Example] Fitting dicitonary learning model')
dict_learning.fit(func_filenames)
print('[Example] Saving results')
# plase note the below porblem may have occur
# following info. plz check websise
# 更新不到0.4.1會炸
# 測試0422
# Grab extracted components umasked back to Nifti image.
# Note: For older versions, less than 0.4.1. components_img_
# is not implemented. See Note section above for details.
dictlearning_components_img = dict_learning.components_img_
dictlearning_components_img.to_filename('dictionary_learning_resting_state.nii.gz')
[Example] Fitting dicitonary learning model [MultiNiftiMasker.fit] Loading data from [C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar123_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar124_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar125_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar126_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar127_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar128_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar001_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar002_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar003_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar004_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar005_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar006_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar007_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar008_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar009_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar010_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar011_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar012_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar013_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar014_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar015_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar016_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar017_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar018_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar019_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar020_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar021_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar022_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar023_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz, C:\Users\pc\nilearn_data\development_fmri\development_fmri\sub-pixar024_task-pixar_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz] [MultiNiftiMasker.fit] Computing mask [MultiNiftiMasker.transform] Resampling mask [DictLearning] Loading data [DictLearning] Learning initial components
[Parallel(n_jobs=1)]: Using backend SequentialBackend with 1 concurrent workers. [Parallel(n_jobs=1)]: Done 1 out of 1 | elapsed: 0.4s finished
[DictLearning] Computing initial loadings [DictLearning] Learning dictionary [Example] Saving results
# since we alredy runs the niftimasker
# please note we can't runs the ICA again
# 0422
dictlearning_components_img
plot_prob_atlas(dictlearning_components_img,
title='All DictLearning components')
F:\Users\pc\anaconda3\lib\site-packages\nilearn\plotting\displays.py:101: UserWarning: linewidths is ignored by contourf im = getattr(ax, type)(data_2d.copy(), F:\Users\pc\anaconda3\lib\site-packages\nilearn\plotting\displays.py:101: UserWarning: No contour levels were found within the data range. im = getattr(ax, type)(data_2d.copy(),
<nilearn.plotting.displays.OrthoSlicer at 0x1feabc6ea30>
# can't change axes
# I've tried.
for i, cur_img in enumerate(iter_img(dictlearning_components_img)):
plot_stat_map(cur_img, display_mode="z", title="Comp %d" % i,
cut_coords=1, colorbar=True)
F:\Users\pc\anaconda3\lib\site-packages\nilearn\plotting\displays.py:1750: MatplotlibDeprecationWarning: Adding an axes using the same arguments as a previous axes currently reuses the earlier instance. In a future version, a new instance will always be created and returned. Meanwhile, this warning can be suppressed, and the future behavior ensured, by passing a unique label to each axes instance. ax = fh.add_axes([fraction * index * (x1 - x0) + x0, y0,
by matplotlib
scores = dict_learning.score(func_filenames, per_component=True)
# Plot the scores
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.ticker import FormatStrFormatter
# just comparative of all DLICs.
# itfollows websides tutorial.
plt.figure(figsize=(4, 4))
positions = np.arange(len(scores))
plt.barh(positions, scores)
plt.ylabel('Component #', size=12)
plt.xlabel('Explained variance', size=12)
plt.yticks(np.arange(20))
plt.gca().xaxis.set_major_formatter(FormatStrFormatter('%.3f'))
plt.tight_layout()
show()
use for network connectomne
# Import Region Extractor algorithm from regions module
# threshold=0.5 indicates that we keep nominal of amount nonzero voxels across all
# maps, less the threshold means that more intense non-voxels will be survived.
# 閾值可修改
# for preserve the best out come comparative with the turorial I haven't change
from nilearn.regions import RegionExtractor
extractor = RegionExtractor(dictlearning_components_img, threshold=0.5,
thresholding_strategy='ratio_n_voxels',
extractor='local_regions',
standardize=True, min_region_size=1350)
# Just call fit() to process for regions extraction
extractor.fit()
# Extracted regions are stored in regions_img_
regions_extracted_img = extractor.regions_img_
# Each region index is stored in index_
regions_index = extractor.index_
# Total number of regions extracted
n_regions_extracted = regions_extracted_img.shape[-1]
# Visualization of region extraction results
title = ('%d regions are extracted from %d components.'
'\nEach separate color of region indicates extracted region'
% (n_regions_extracted, 12))
# results()
# check
regions_extracted_img
<nibabel.nifti1.Nifti1Image at 0x1feaa173ee0>
n_regions_extracted
37
# plot
from nilearn import plotting
plotting.plot_prob_atlas(regions_extracted_img, view_type='filled_contours',
title=title, colorbar=True)
F:\Users\pc\anaconda3\lib\site-packages\numpy\ma\core.py:2831: UserWarning: Warning: converting a masked element to nan. _data = np.array(data, dtype=dtype, copy=copy,
<nilearn.plotting.displays.OrthoSlicer at 0x1feab7020d0>
for j, cur_img in enumerate(iter_img(dictlearning_components_img)):
plot_stat_map(cur_img, display_mode="y", title="exComp %d" % j,
cut_coords=1, colorbar=True)
for j, cur_img in enumerate(iter_img(dictlearning_components_img)):
plot_stat_map(cur_img, display_mode="z", title="exComp %d" % j,
cut_coords=1, colorbar=True)
# First we need to do subjects timeseries signals extraction and then estimating
# correlation matrices on those signals.
# To extract timeseries signals, we call transform() from RegionExtractor object
# onto each subject functional data stored in func_filenames.
# To estimate correlation matrices we import connectome utilities from nilearn
from nilearn.connectome import ConnectivityMeasure
confounds = rest_dataset.confounds
correlations = []
# Initializing ConnectivityMeasure object with kind='correlation'
connectome_measure = ConnectivityMeasure(kind='correlation')
for filename, confound in zip(func_filenames, confounds):
# call transform from RegionExtractor object to extract timeseries signals
timeseries_each_subject = extractor.transform(filename, confounds=confound)
# call fit_transform from ConnectivityMeasure object
correlation = connectome_measure.fit_transform([timeseries_each_subject])
# saving each subject correlation to correlations
correlations.append(correlation)
# Mean of all correlations
import numpy as np
mean_correlations = np.mean(correlations, axis=0).reshape(n_regions_extracted,
n_regions_extracted)
mean_correlations
array([[ 1. , -0.15657705, 0.08295416, ..., 0.02827156,
0.14643293, 0.132149 ],
[-0.15657705, 1. , 0.09462031, ..., -0.00497102,
0.05321047, 0.02406557],
[ 0.08295416, 0.09462031, 1. , ..., 0.16956924,
0.24817098, 0.15168663],
...,
[ 0.02827156, -0.00497102, 0.16956924, ..., 1. ,
0.28426346, 0.28031608],
[ 0.14643293, 0.05321047, 0.24817098, ..., 0.28426346,
1. , 0.258771 ],
[ 0.132149 , 0.02406557, 0.15168663, ..., 0.28031608,
0.258771 , 1. ]])
from nilearn import plotting
title = 'Correlation between %d regions' % n_regions_extracted
# First plot the matrix
display = plotting.plot_matrix(mean_correlations, vmax=1, vmin=-1,
colorbar=True,
title=title)
# Then find the center of the regions and plot a connectome
regions_img = regions_extracted_img
coords_connectome = plotting.find_probabilistic_atlas_cut_coords(regions_img)
plotting.plot_connectome(mean_correlations, coords_connectome,
edge_threshold='90%', title=title)
<nilearn.plotting.displays.OrthoProjector at 0x1feab748130>
# First, we plot a network of index=4 without region extraction (left plot)
from nilearn import image
img = image.index_img(dictlearning_components_img, 9)
coords = plotting.find_xyz_cut_coords(img)
display = plotting.plot_stat_map(img, cut_coords=coords, colorbar=False,
title='Showing one specific network')
# For this, we take the indices of the all regions extracted related to original
# network given as 4.
# For this, we take the indices of the all regions extracted related to original
# network given as 4.
regions_indices_of_map3 = np.where(np.array(regions_index) == 9)
display = plotting.plot_anat(cut_coords=coords,
title='Regions from this network'
)
# Add as an overlay all the regions of index 4
colors = 'rgbcmyk'
for each_index_of_map3, color in zip(regions_indices_of_map3[0], colors):
display.add_overlay(image.index_img(regions_extracted_img, each_index_of_map3),
cmap=plotting.cm.alpha_cmap(color))
plotting.show()
F:\Users\pc\anaconda3\lib\site-packages\numpy\ma\core.py:2831: UserWarning: Warning: converting a masked element to nan. _data = np.array(data, dtype=dtype, copy=copy,
view37 = plotting.view_connectome(
mean_correlations, coords_connectome,
edge_threshold='90%', title=title)
view37